testl $~0,(r(dx),r(ax),1)
jnz .Lsvm_process_softirqs
+ testb $0, VCPU_nsvm_hap_enabled(r(bx))
+ jz .Lsvm_asid_handle
+
mov VCPU_nhvm_p2m(r(bx)),r(ax)
test r(ax),r(ax)
sete %al
andb VCPU_nhvm_guestmode(r(bx)),%al
jnz .Lsvm_nsvm_no_p2m
+.Lsvm_asid_handle:
call svm_asid_handle_vmrun
cmpb $0,addr_of(tb_init_done)
/* host nested paging + guest shadow paging. */
n2vmcb->_np_enable = 1;
/* Keep h_cr3 as it is. */
+ n2vmcb->_h_cr3 = n1vmcb->_h_cr3;
/* When l1 guest does shadow paging
* we assume it intercepts page faults.
*/
OFFSET(VCPU_nhvm_guestmode, struct vcpu, arch.hvm_vcpu.nvcpu.nv_guestmode);
OFFSET(VCPU_nhvm_p2m, struct vcpu, arch.hvm_vcpu.nvcpu.nv_p2m);
+ OFFSET(VCPU_nsvm_hap_enabled, struct vcpu, arch.hvm_vcpu.nvcpu.u.nsvm.ns_hap_enabled);
BLANK();
OFFSET(VMCB_rax, struct vmcb_struct, rax);
OFFSET(VCPU_nhvm_guestmode, struct vcpu, arch.hvm_vcpu.nvcpu.nv_guestmode);
OFFSET(VCPU_nhvm_p2m, struct vcpu, arch.hvm_vcpu.nvcpu.nv_p2m);
+ OFFSET(VCPU_nsvm_hap_enabled, struct vcpu, arch.hvm_vcpu.nvcpu.u.nsvm.ns_hap_enabled);
BLANK();
OFFSET(DOMAIN_is_32bit_pv, struct domain, arch.is_32bit_pv);